Fit VBGE models to back-calculated length-at-age

Author

Max Lindmark, Jan Ohlberger, Anna Gårdmark

Published

April 7, 2025

Load libraries

library(tidyverse)
library(tidylog)
library(broom)
library(RColorBrewer)
library(viridis)
library(minpack.lm)
library(patchwork)
library(ggtext)
library(brms)
library(modelr)
library(tidybayes)
library(ggridges)
library(performance)

# devtools::install_github("seananderson/ggsidekick") ## not on CRAN
library(ggsidekick)
theme_set(theme_sleek())

# Load functions
home <- here::here()
fxn <- list.files(paste0(home, "/R/functions"))
invisible(sapply(FUN = source, paste0(home, "/R/functions/", fxn)))

Read and trim data

d <- # read_csv(paste0(home, "/data/for-analysis/dat.csv")) |>
  read_csv("https://raw.githubusercontent.com/maxlindmark/perch-growth/master/data/clean/dat.csv") |>
  filter(
    age_ring == "Y", # use only length-at-age by filtering on age_ring
    !area %in% c("VN", "TH")
  )
Rows: 338460 Columns: 12
── Column specification ────────────────────────────────────────────────────────
Delimiter: ","
chr (6): age_ring, area, gear, ID, sex, keep
dbl (6): length_mm, age_bc, age_catch, catch_year, cohort, final_length

ℹ Use `spec()` to retrieve the full column specification for this data.
ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
filter: removed 43,886 rows (13%), 294,574 rows remaining
# Minimum number of observations per area and cohort
d$area_cohort <- as.factor(paste(d$area, d$cohort))

d <- d |>
  group_by(area_cohort) |>
  filter(n() > 100) |>
  ungroup()
group_by: one grouping variable (area_cohort)
filter (grouped): removed 2,637 rows (1%), 291,937 rows remaining
ungroup: no grouping variables
# Minimum number of observations per area, cohort, and age
d$area_cohort_age <- as.factor(paste(d$area, d$cohort, d$age_bc))

d <- d |>
  group_by(area_cohort_age) |>
  filter(n() > 10) |>
  ungroup()
group_by: one grouping variable (area_cohort_age)
filter (grouped): removed 3,521 rows (1%), 288,416 rows remaining
ungroup: no grouping variables
# Minimum number of cohorts in a given area
cnt <- d |>
  group_by(area) |>
  summarise(n = n_distinct(cohort)) |>
  filter(n >= 10)
group_by: one grouping variable (area)
summarise: now 10 rows and 2 columns, ungrouped
filter: no rows removed
d <- d[d$area %in% cnt$area, ]

# Plot cleaned data
ggplot(d, aes(age_bc, length_mm)) +
  geom_jitter(size = 0.1, height = 0, alpha = 0.1) +
  scale_x_continuous(breaks = seq(20)) +
  theme(axis.text.x = element_text(angle = 0)) +
  theme(axis.text = element_text(size = 12), axis.title = element_text(size = 15)) +
  labs(x = "Age", y = "Length (mm)") +
  guides(color = "none") +
  facet_wrap(~area, scale = "free_x")

# Longitude and latitude attributes for each area
area <- c("BS", "BT", "FB", "FM", "HO", "JM", "MU", "RA", "SI_EK", "SI_HA", "TH")
nareas <- length(area)
lat <- c(60, 60.4, 60.3, 60.5, 63.7, 58, 59, 65.9, 57.3, 57.4, 56.1)
lon <- c(21.5, 18.1, 19.5, 18, 20.9, 16.8, 18.1, 22.3, 16.6, 16.7, 15.9)
area_attr <- data.frame(cbind(area = area, lat = lat, lon = lon)) |>
  mutate_at(c("lat", "lon"), as.numeric)
mutate_at: converted 'lat' from character to double (0 new NA)
           converted 'lon' from character to double (0 new NA)

Fit VBGE models

# Get individual growth parameters (functions: VBGF/Gompertz,nls_out,fit_nls)
IVBG <- d |>
  group_by(ID) |>
  summarize(nls_out(fit_nls(length_mm, age_bc, min_nage = 5, model = "VBGF"))) |>
  ungroup()
group_by: one grouping variable (ID)
summarize: now 75,245 rows and 5 columns, ungrouped
ungroup: no grouping variables
IVBG <- IVBG |> drop_na(k) # The NAs are because the model didn't converge or because they were below the threshold age
drop_na: removed 51,635 rows (69%), 23,610 rows remaining
IVBG <- IVBG |>
  mutate(
    k_lwr = k - 1.96 * k_se,
    k_upr = k + 1.96 * k_se,
    linf_lwr = linf - 1.96 * linf_se,
    linf_upr = linf + 1.96 * linf_se,
    A = k * linf * 0.65,
    row_id = row_number()
  )
mutate: new variable 'k_lwr' (double) with 23,608 unique values and 0% NA
        new variable 'k_upr' (double) with 23,608 unique values and 0% NA
        new variable 'linf_lwr' (double) with 23,608 unique values and 0% NA
        new variable 'linf_upr' (double) with 23,608 unique values and 0% NA
        new variable 'A' (double) with 23,608 unique values and 0% NA
        new variable 'row_id' (integer) with 23,610 unique values and 0% NA
# Add back cohort and area variables
IVBG <- IVBG |>
  left_join(d |> select(ID, area_cohort)) |>
  separate(area_cohort, into = c("area", "cohort"), remove = TRUE, sep = " ") |>
  mutate(cohort = as.numeric(cohort))
select: dropped 12 variables (length_mm, age_bc, age_catch, age_ring, area, …)
Joining with `by = join_by(ID)`
left_join: added one column (area_cohort)
> rows only in x 0
> rows only in y (146,365)
> matched rows 142,051 (includes duplicates)
> =========
> rows total 142,051
mutate: converted 'cohort' from character to double (0 new NA)
# Compare how the means and quantiles differ depending on this filtering
IVBG_filter <- IVBG |>
  filter(k_se < quantile(k_se, probs = 0.95))
filter: removed 7,107 rows (5%), 134,944 rows remaining
# Summarize growth coefficients by cohort and area
VBG <- IVBG |>
  filter(k_se < k) |> # new!
  group_by(cohort, area) |>
  summarize(
    k = quantile(k, prob = 0.5, na.rm = T),
    A = quantile(A, prob = 0.5, na.rm = T),
    linf = quantile(linf, prob = 0.5, na.rm = T),
    k_lwr = quantile(k, prob = 0.05, na.rm = T),
    k_upr = quantile(k, prob = 0.95, na.rm = T)
  ) |>
  ungroup()
filter: removed 4,220 rows (3%), 137,831 rows remaining
group_by: 2 grouping variables (cohort, area)
summarize: now 306 rows and 7 columns, one group variable remaining (cohort)
ungroup: no grouping variables

Calculate sample size

sample_size <- IVBG |>
  group_by(area) |>
  summarise(
    n_cohort = length(unique(cohort)),
    min_cohort = min(cohort),
    max_cohort = max(cohort),
    n_individuals = length(unique(ID)),
    n_data_points = n()
  )
group_by: one grouping variable (area)
summarise: now 10 rows and 6 columns, ungrouped
sample_size
# A tibble: 10 × 6
   area  n_cohort min_cohort max_cohort n_individuals n_data_points
   <chr>    <int>      <dbl>      <dbl>         <int>         <int>
 1 BS          17       1985       2001          1333          7683
 2 BT          22       1972       1995           961          5371
 3 FB          47       1969       2015          6040         37381
 4 FM          37       1963       1999          5056         32637
 5 HO          29       1982       2015          1152          6220
 6 JM          60       1953       2014          4867         28744
 7 MU          18       1981       1999          1104          6666
 8 RA          14       1985       2003           573          3128
 9 SI_EK       24       1968       2011           659          3576
10 SI_HA       38       1967       2013          1865         10645
sample_size |>
  ungroup() |>
  summarise(sum_ind = sum(n_individuals), sum_n = sum(n_data_points))
ungroup: no grouping variables
summarise: now one row and 2 columns, ungrouped
# A tibble: 1 × 2
  sum_ind  sum_n
    <int>  <int>
1   23610 142051
write_csv(sample_size, paste0(home, "/output/sample_size.csv"))

Add GAM-predicted temperature to growth models

pred_temp <- read_csv(paste0(home, "/output/gam_predicted_temps.csv")) |>
  rename(cohort = year)
Rows: 380 Columns: 4
── Column specification ────────────────────────────────────────────────────────
Delimiter: ","
chr (2): area, model
dbl (2): year, temp

ℹ Use `spec()` to retrieve the full column specification for this data.
ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
rename: renamed one variable (cohort)
VBG <- VBG |>
  left_join(pred_temp, by = c("area", "cohort"))
left_join: added 2 columns (temp, model)
           > rows only in x     0
           > rows only in y  ( 74)
           > matched rows     306
           >                 =====
           > rows total       306
# Save data for map-plot
cohort_sample_size <- IVBG |>
  group_by(area, cohort) |>
  summarise(n = n()) # individuals, not samples!
group_by: 2 grouping variables (area, cohort)
summarise: now 306 rows and 3 columns, one group variable remaining (area)
VBG <- left_join(VBG, cohort_sample_size, by = c("cohort", "area"))
left_join: added one column (n)
           > rows only in x     0
           > rows only in y  (  0)
           > matched rows     306
           >                 =====
           > rows total       306
write_csv(VBG, paste0(home, "/output/vbg.csv"))

# Calculate the plotting order (also used for map plot)
order <- VBG |>
  ungroup() |>
  group_by(area) |>
  summarise(mean_temp = mean(temp)) |>
  arrange(desc(mean_temp))
ungroup: no grouping variables
group_by: one grouping variable (area)
summarise: now 10 rows and 2 columns, ungrouped
order
# A tibble: 10 × 2
   area  mean_temp
   <chr>     <dbl>
 1 SI_HA     13.6 
 2 BT        13.1 
 3 FM         8.90
 4 JM         8.77
 5 BS         8.44
 6 FB         8.16
 7 SI_EK      8.03
 8 MU         6.85
 9 HO         6.58
10 RA         6.50
write_csv(order, paste0(home, "/output/ranked_temps.csv"))

nareas <- length(unique(order$area)) + 2 # to skip the brightest colors that are hard to read
colors <- colorRampPalette(brewer.pal(name = "RdYlBu", n = 10))(nareas)[-c(6, 7)]

Plot VBGE fits

# Sample 30 IDs per area and plot their data and VBGE fits
set.seed(4)
ids <- IVBG |>
  distinct(ID, .keep_all = TRUE) |>
  group_by(area) |>
  slice_sample(n = 30)
distinct: removed 118,441 rows (83%), 23,610 rows remaining
group_by: one grouping variable (area)
slice_sample (grouped): removed 23,310 rows (99%), 300 rows remaining
IVBG2 <- IVBG |>
  filter(ID %in% ids$ID) |>
  distinct(ID, .keep_all = TRUE) |>
  select(ID, k, linf)
filter: removed 140,354 rows (99%), 1,697 rows remaining
distinct: removed 1,397 rows (82%), 300 rows remaining
select: dropped 10 variables (k_se, linf_se, k_lwr, k_upr, linf_lwr, …)
d2 <- d |>
  ungroup() |>
  filter(ID %in% ids$ID) |>
  left_join(IVBG2, by = "ID") |>
  mutate(length_mm_pred = linf * (1 - exp(-k * age_bc)))
ungroup: no grouping variables
filter: removed 286,719 rows (99%), 1,697 rows remaining
left_join: added 2 columns (k, linf)
           > rows only in x       0
           > rows only in y  (    0)
           > matched rows     1,697
           >                 =======
           > rows total       1,697
mutate: new variable 'length_mm_pred' (double) with 1,697 unique values and 0% NA
order <- order |> 
  mutate(area2 = ifelse(area %in% c("SI_HA", "BT"), paste0(area, "*"), area))
mutate: new variable 'area2' (character) with 10 unique values and 0% NA
d2 <- d2 |> 
  mutate(area2 = ifelse(area %in% c("SI_HA", "BT"), paste0(area, "*"), area))
mutate: new variable 'area2' (character) with 10 unique values and 0% NA
fits_ind <- ggplot(d2, aes(age_bc, length_mm, group = ID, color = ID)) +
  geom_jitter(width = 0.3, height = 0, alpha = 0.5, size = 0.4) +
  geom_line(aes(age_bc, length_mm_pred, group = ID, color = ID),
    inherit.aes = FALSE, alpha = 0.8, linewidth = 0.3
  ) +
  guides(color = "none") +
  scale_color_viridis(discrete = TRUE, name = "Site", option = "cividis") +
  scale_x_continuous(breaks = seq(1, 20, by = 2)) +
  labs(x = "Age (years)", y = "Length (mm)") +
  facet_wrap(~ factor(area2, order$area2), ncol = 5)

A <- IVBG |>
  mutate(area = ifelse(area %in% c("SI_HA", "BT"), paste0(area, "*"), area)) |> 
  ggplot(aes(factor(area, order$area2), A, fill = factor(area, order$area2))) +
  coord_cartesian(ylim = c(22, 90)) +
  geom_violin(alpha = 0.8, color = NA) +
  geom_boxplot(outlier.color = NA, width = 0.2, alpha = 0.9, fill = NA, size = 0.4) +
  scale_fill_manual(values = colors, name = "Site") +
  scale_color_manual(values = colors, name = "Site") +
  guides(fill = "none", color = "none") +
  labs(x = "Site", y = "Growth coefficient (*A*)") +
  theme(axis.title.y = ggtext::element_markdown())
mutate: changed 16,016 values (11%) of 'area' (0 new NA)
fits_ind / A + plot_annotation(tag_levels = "A") #+ plot_layout(heights = c(1, 1.8))

ggsave(paste0(home, "/figures/vb_pars_fits.pdf"), width = 16, height = 17, units = "cm")

# Supp plot for K and Linf
k <- IVBG |>
  ggplot(aes(factor(area, order$area), k, fill = factor(area, order$area))) +
  coord_cartesian(ylim = c(0, 0.7)) +
  geom_violin(alpha = 0.8, color = NA) +
  geom_boxplot(outlier.color = NA, width = 0.2, alpha = 0.9, fill = NA, size = 0.4) +
  scale_fill_manual(values = colors, name = "Site") +
  guides(fill = "none", color = "none") +
  theme(axis.title.x = element_blank(),
        axis.text.x = element_blank()) +
  labs(y = expression(italic(k)))

linf <- IVBG |>
  filter(linf < 2300) |>
  filter(linf > 130) |>
  mutate(area2 = ifelse(area %in% c("SI_HA", "BT"), paste0(area, "*"), area)) |> 
  ggplot(aes(factor(area2, order$area2), linf, fill = factor(area2, order$area2))) +
  geom_violin(alpha = 0.8, color = NA) +
  geom_boxplot(outlier.color = NA, width = 0.1, alpha = 0.9, fill = NA, size = 0.4) +
  coord_cartesian(ylim = c(0, 2000)) +
  scale_fill_manual(values = colors, name = "Site") +
  guides(fill = "none", color = "none") +
  labs(x = "Site", y = expression(paste(italic(L[infinity]), " [mm]")))
filter: removed 1,246 rows (1%), 140,805 rows remaining
filter: no rows removed
mutate: new variable 'area2' (character) with 10 unique values and 0% NA
k / linf

ggsave(paste0(home, "/figures/supp/vb_k_linf.pdf"), width = 17, height = 18, units = "cm")

Fit Models

Overall, \(A\) looks very linearly related to temperature in most cases, even when I add gams on top

dat <- VBG |>
  mutate(
    temp_sc = (temp - mean(temp)) / sd(temp),
    temp_sq_sc = temp_sc * temp_sc,
    area_f = as.factor(area)
  )
mutate: new variable 'temp_sc' (double) with 294 unique values and 0% NA
        new variable 'temp_sq_sc' (double) with 294 unique values and 0% NA
        new variable 'area_f' (factor) with 10 unique values and 0% NA
# Non-scaled x-axis
labels <- ifelse(order$area %in% c("SI_HA", "BT"), paste0(order$area, "*"), order$area)

scatter <- ggplot(dat, aes(temp, A, color = factor(area, order$area)), size = 0.6) +
  geom_point(size = 0.9) +
  scale_color_manual(values = colors, name = "Site", labels = labels) +
  scale_fill_manual(values = colors, name = "Site", labels = labels)

scatter +
  geom_smooth(method = "gam", formula = y ~ s(x, k = 3), se = FALSE)

Now we’ll compare a bunch of models different temperature shapes and site-effects

# Quadratic effect of temperature
m1 <- brm(A ~ area_f + temp_sc + temp_sq_sc,
  data = dat,
  cores = 4,
  chains = 4,
  iter = 4000,
  family = student,
  prior = set_prior("normal(0,5)", class = "b"),
  save_pars = save_pars(all = TRUE),
  seed = 99
)

# Interaction between area and temperature
m2 <- brm(A ~ area_f * temp_sc,
  data = dat,
  cores = 4,
  chains = 4,
  iter = 4000,
  family = student,
  prior = set_prior("normal(0,5)", class = "b"),
  save_pars = save_pars(all = TRUE),
  seed = 99
)

# Interaction between area and temperature but common squared term
m3 <- brm(A ~ area_f * temp_sc + temp_sq_sc,
  data = dat,
  cores = 4,
  chains = 4,
  iter = 4000,
  family = student,
  prior = set_prior("normal(0,5)", class = "b"),
  save_pars = save_pars(all = TRUE),
  seed = 99
)

# Quadratic effect of temperature, full random
m4 <- brm(A ~ temp_sc + temp_sq_sc + (1 + temp_sc + temp_sq_sc | area_f),
  data = dat,
  cores = 4,
  chains = 4,
  iter = 4000,
  family = student,
  prior = set_prior("normal(0,5)", class = "b"),
  save_pars = save_pars(all = TRUE),
  control = list(adapt_delta = 0.9),
  seed = 99
)

# random intercept and slope?
m5 <- brm(A ~ temp_sc + (1 + temp_sc | area_f),
  data = dat,
  cores = 4,
  chains = 4,
  iter = 4000,
  family = student,
  prior = set_prior("normal(0,5)", class = "b"),
  save_pars = save_pars(all = TRUE),
  control = list(adapt_delta = 0.95),
  seed = 99
)

Compare all models with loo

loo(m1, m2, m3, m4, m5,
  moment_match = TRUE
)
Output of model 'm1':

Computed from 8000 by 306 log-likelihood matrix.

         Estimate   SE
elpd_loo   -886.8 16.6
p_loo        13.8  0.9
looic      1773.6 33.2
------
MCSE of elpd_loo is 0.0.
MCSE and ESS estimates assume MCMC draws (r_eff in [0.3, 1.6]).

All Pareto k estimates are good (k < 0.7).
See help('pareto-k-diagnostic') for details.

Output of model 'm2':

Computed from 8000 by 306 log-likelihood matrix.

         Estimate   SE
elpd_loo   -881.0 16.7
p_loo        20.8  1.7
looic      1761.9 33.5
------
MCSE of elpd_loo is 0.1.
MCSE and ESS estimates assume MCMC draws (r_eff in [0.3, 1.6]).

All Pareto k estimates are good (k < 0.7).
See help('pareto-k-diagnostic') for details.

Output of model 'm3':

Computed from 8000 by 306 log-likelihood matrix.

         Estimate   SE
elpd_loo   -881.7 16.7
p_loo        21.5  1.8
looic      1763.3 33.5
------
MCSE of elpd_loo is 0.1.
MCSE and ESS estimates assume MCMC draws (r_eff in [0.3, 1.6]).

All Pareto k estimates are good (k < 0.7).
See help('pareto-k-diagnostic') for details.

Output of model 'm4':

Computed from 8000 by 306 log-likelihood matrix.

         Estimate   SE
elpd_loo   -881.6 16.7
p_loo        20.9  2.1
looic      1763.3 33.4
------
MCSE of elpd_loo is 0.1.
MCSE and ESS estimates assume MCMC draws (r_eff in [0.5, 1.4]).

All Pareto k estimates are good (k < 0.7).
See help('pareto-k-diagnostic') for details.

Output of model 'm5':

Computed from 8000 by 306 log-likelihood matrix.

         Estimate   SE
elpd_loo   -881.2 16.7
p_loo        17.9  1.6
looic      1762.3 33.4
------
MCSE of elpd_loo is 0.1.
MCSE and ESS estimates assume MCMC draws (r_eff in [0.4, 1.4]).

All Pareto k estimates are good (k < 0.7).
See help('pareto-k-diagnostic') for details.

Model comparisons:
   elpd_diff se_diff
m2  0.0       0.0   
m5 -0.2       2.1   
m4 -0.7       2.4   
m3 -0.7       0.5   
m1 -5.9       4.1   

Plot the favorite model

performance::r2_bayes(m4)
# Bayesian R2 with Compatibility Interval

  Conditional R2: 0.269 (95% CI [0.196, 0.338])
     Marginal R2: 0.280 (95% CI [0.058, 0.494])
prior_summary(m4)
                   prior     class       coef  group resp dpar nlpar lb ub
             normal(0,5)         b                                        
             normal(0,5)         b    temp_sc                             
             normal(0,5)         b temp_sq_sc                             
 student_t(3, 43.4, 4.4) Intercept                                        
    lkj_corr_cholesky(1)         L                                        
    lkj_corr_cholesky(1)         L            area_f                      
           gamma(2, 0.1)        nu                                    1   
    student_t(3, 0, 4.4)        sd                                    0   
    student_t(3, 0, 4.4)        sd            area_f                  0   
    student_t(3, 0, 4.4)        sd  Intercept area_f                  0   
    student_t(3, 0, 4.4)        sd    temp_sc area_f                  0   
    student_t(3, 0, 4.4)        sd temp_sq_sc area_f                  0   
    student_t(3, 0, 4.4)     sigma                                    0   
       source
         user
 (vectorized)
 (vectorized)
      default
      default
 (vectorized)
      default
      default
 (vectorized)
 (vectorized)
 (vectorized)
 (vectorized)
      default
m4
 Family: student 
  Links: mu = identity; sigma = identity; nu = identity 
Formula: A ~ temp_sc + temp_sq_sc + (1 + temp_sc + temp_sq_sc | area_f) 
   Data: dat (Number of observations: 306) 
  Draws: 4 chains, each with iter = 4000; warmup = 2000; thin = 1;
         total post-warmup draws = 8000

Group-Level Effects: 
~area_f (Number of levels: 10) 
                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(Intercept)                 1.75      0.81     0.48     3.64 1.00     1857
sd(temp_sc)                   2.22      0.92     0.82     4.36 1.00     3073
sd(temp_sq_sc)                1.17      0.83     0.07     3.18 1.00     2012
cor(Intercept,temp_sc)        0.47      0.34    -0.33     0.94 1.00     3944
cor(Intercept,temp_sq_sc)     0.19      0.44    -0.70     0.90 1.00     4608
cor(temp_sc,temp_sq_sc)       0.45      0.41    -0.54     0.96 1.00     4699
                          Tail_ESS
sd(Intercept)                 2029
sd(temp_sc)                   3926
sd(temp_sq_sc)                2864
cor(Intercept,temp_sc)        4850
cor(Intercept,temp_sq_sc)     5324
cor(temp_sc,temp_sq_sc)       5730

Population-Level Effects: 
           Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept     44.00      0.73    42.51    45.43 1.00     3573     4301
temp_sc        3.02      0.98     1.11     5.10 1.00     3034     3764
temp_sq_sc    -0.03      0.66    -1.34     1.33 1.00     3105     3269

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     3.41      0.26     2.93     3.95 1.00     5002     5163
nu        5.97      2.43     3.17    12.24 1.00     5578     4897

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
# Predict with the main model
m4_pred <- dat |>
  group_by(area) |>
  data_grid(temp_sc = seq_range(temp_sc, n = 100)) |>
  ungroup() |>
  mutate(
    area_f = as.factor(area),
    temp = (temp_sc * sd(VBG$temp)) + mean(VBG$temp),
    temp_sq_sc = temp_sc * temp_sc
  ) |>
  add_epred_draws(m4)

# Global prediction?
m4_pred_glob <- dat |>
  data_grid(temp_sc = seq_range(temp_sc, n = 100)) |>
  mutate(
    temp = (temp_sc * sd(VBG$temp)) + mean(VBG$temp),
    temp_sq_sc = temp_sc * temp_sc
  ) |>
  add_epred_draws(m4, re_formula = NA)

# Plot area specific predictions as facets
p0 <- scatter +
  stat_lineribbon(
    data = m4_pred,
    aes(temp,
      y = .epred,
      fill = factor(area, order$area)
    ),
    alpha = 0.1, .width = 0.95, linewidth = 0.9
  ) +
  stat_lineribbon(
    data = m4_pred,
    aes(temp,
      y = .epred,
      fill = factor(area, order$area)
    ),
    alpha = 1, .width = 0, linewidth = 0.9
  ) +
  guides(fill = "none", color = guide_legend(
    nrow = 1, reverse = TRUE,
    title.position = "top", title.hjust = 0.5,
    override.aes = list(fill = NA),
    position = "inside"
  )) +
  theme(
    legend.position.inside = c(0.5, 0.05),
    #legend.position = "bottom",
    legend.spacing.y = unit(0, "lines"),
    legend.spacing.x = unit(0, "lines"),
    legend.key.size = unit(0.25, "cm"),
    legend.text = element_text(size = 7),
    axis.title.y = ggtext::element_markdown()
  ) +
  labs(
    x = "Mean site temperature [°C]", y = "Size-corrected growth coefficient (*A*)",
    color = ""
  )

p0

ggsave(paste0(home, "/figures/a_conditional.pdf"), width = 12, height = 12, units = "cm")

# Binned plot?
dat |> 
  mutate(tp = cut(cohort, breaks = seq(1950, 2015, by = 5))) |> 
  summarise(temp = mean(temp),
            A = mean(A),
            .by = c(area, tp)) |> 
  ggplot(aes(temp, A, color = factor(area, order$area)), size = 0.6) +
  geom_point(size = 0.9) +
  #geom_smooth(se = FALSE, formula = y~s(x, k = 3), method = "gam") +
  scale_color_manual(values = colors, name = "Site") +
  scale_fill_manual(values = colors, name = "Site")

# Q10 between 5 -- 15 C

# Global prediction?
m4_pred_glob <- dat |>
  data_grid(temp_sc = seq_range(temp_sc, n = 100)) |>
  mutate(
    temp = (temp_sc * sd(VBG$temp)) + mean(VBG$temp),
    temp_sq_sc = temp_sc * temp_sc
  ) |>
  add_epred_draws(m4, re_formula = NA)

# Plot area specific predictions as facets
scatter +
  stat_lineribbon(data = m4_pred_glob,
                  aes(temp, y = .epred), inherit.aes = FALSE, color = "grey30", fill = "grey30",
                  alpha = 1, .width = 0, linewidth = 0.9)

dat |>
  data_grid(temp = c(5, 15)) |>
  mutate(
    temp_sc = (temp - mean(VBG$temp)) / sd(VBG$temp),
    temp_sq_sc = temp_sc * temp_sc
  ) |>
  add_epred_draws(m4, re_formula = NA) |>
  ungroup() |>
  dplyr::select(-temp_sc, -temp_sq_sc, -.chain, -.iteration, -.row) |>
  pivot_wider(values_from = ".epred", names_from = temp) |>
  mutate(q10 = (`15` / `5`)^(10/(15-5))) |> 
  summarise(median = median(q10),
            lwr = quantile(q10, probs = 0.025),
            upr = quantile(q10, probs = 0.975))
# A tibble: 1 × 3
  median   lwr   upr
   <dbl> <dbl> <dbl>
1   1.25  1.06  1.49
# Look at the slopes and intercepts by area
mean_temps <- pred_temp |>
  group_by(area) |>
  summarise(mean_temp = mean(temp))

p2 <- m4 |>
  spread_draws(b_Intercept, r_area_f[Intercept, ]) |>
  median_qi(intercept = b_Intercept + r_area_f, .width = c(.9)) |>
  mutate(area = Intercept) |>
  left_join(mean_temps, by = "area") |>
  ggplot(aes(
    y = intercept, x = mean_temp, ymin = .lower, ymax = .upper,
    color = factor(area, order$area),
    fill = factor(area, order$area)
  )) +
  geom_smooth(
    method = "lm", inherit.aes = FALSE, linetype = 2,
    aes(mean_temp, intercept), alpha = 0.15, color = "grey"
  ) +
  geom_point(size = 2) +
  geom_errorbar(alpha = 0.3) +
  scale_color_manual(values = colors, name = "Site") +
  scale_fill_manual(values = colors, name = "Site") +
  labs(y = "Site-specific intercept", x = "Mean site temperature (°C)") +
  guides(
    fill = "none",
    color = "none"
  ) +
  theme(axis.title.y = ggtext::element_markdown())

p2

ggsave(paste0(home, "/figures/random_intercepts.pdf"), width = 12, height = 12, units = "cm")


# Check significance of slopes?!
get_variables(m4)
 [1] "b_Intercept"                       "b_temp_sc"                        
 [3] "b_temp_sq_sc"                      "sd_area_f__Intercept"             
 [5] "sd_area_f__temp_sc"                "sd_area_f__temp_sq_sc"            
 [7] "cor_area_f__Intercept__temp_sc"    "cor_area_f__Intercept__temp_sq_sc"
 [9] "cor_area_f__temp_sc__temp_sq_sc"   "sigma"                            
[11] "nu"                                "Intercept"                        
[13] "r_area_f[BS,Intercept]"            "r_area_f[BT,Intercept]"           
[15] "r_area_f[FB,Intercept]"            "r_area_f[FM,Intercept]"           
[17] "r_area_f[HO,Intercept]"            "r_area_f[JM,Intercept]"           
[19] "r_area_f[MU,Intercept]"            "r_area_f[RA,Intercept]"           
[21] "r_area_f[SI_EK,Intercept]"         "r_area_f[SI_HA,Intercept]"        
[23] "r_area_f[BS,temp_sc]"              "r_area_f[BT,temp_sc]"             
[25] "r_area_f[FB,temp_sc]"              "r_area_f[FM,temp_sc]"             
[27] "r_area_f[HO,temp_sc]"              "r_area_f[JM,temp_sc]"             
[29] "r_area_f[MU,temp_sc]"              "r_area_f[RA,temp_sc]"             
[31] "r_area_f[SI_EK,temp_sc]"           "r_area_f[SI_HA,temp_sc]"          
[33] "r_area_f[BS,temp_sq_sc]"           "r_area_f[BT,temp_sq_sc]"          
[35] "r_area_f[FB,temp_sq_sc]"           "r_area_f[FM,temp_sq_sc]"          
[37] "r_area_f[HO,temp_sq_sc]"           "r_area_f[JM,temp_sq_sc]"          
[39] "r_area_f[MU,temp_sq_sc]"           "r_area_f[RA,temp_sq_sc]"          
[41] "r_area_f[SI_EK,temp_sq_sc]"        "r_area_f[SI_HA,temp_sq_sc]"       
[43] "lprior"                            "lp__"                             
[45] "z_1[1,1]"                          "z_1[2,1]"                         
[47] "z_1[3,1]"                          "z_1[1,2]"                         
[49] "z_1[2,2]"                          "z_1[3,2]"                         
[51] "z_1[1,3]"                          "z_1[2,3]"                         
[53] "z_1[3,3]"                          "z_1[1,4]"                         
[55] "z_1[2,4]"                          "z_1[3,4]"                         
[57] "z_1[1,5]"                          "z_1[2,5]"                         
[59] "z_1[3,5]"                          "z_1[1,6]"                         
[61] "z_1[2,6]"                          "z_1[3,6]"                         
[63] "z_1[1,7]"                          "z_1[2,7]"                         
[65] "z_1[3,7]"                          "z_1[1,8]"                         
[67] "z_1[2,8]"                          "z_1[3,8]"                         
[69] "z_1[1,9]"                          "z_1[2,9]"                         
[71] "z_1[3,9]"                          "z_1[1,10]"                        
[73] "z_1[2,10]"                         "z_1[3,10]"                        
[75] "L_1[1,1]"                          "L_1[2,1]"                         
[77] "L_1[3,1]"                          "L_1[1,2]"                         
[79] "L_1[2,2]"                          "L_1[3,2]"                         
[81] "L_1[1,3]"                          "L_1[2,3]"                         
[83] "L_1[3,3]"                          "accept_stat__"                    
[85] "stepsize__"                        "treedepth__"                      
[87] "n_leapfrog__"                      "divergent__"                      
[89] "energy__"                         
t_inter <- m4 |>
  spread_draws(b_Intercept, r_area_f[Intercept,]) |>
  median_qi(intercept = b_Intercept + r_area_f, .width = c(.95)) |>
  mutate(area = Intercept) |>
  left_join(mean_temps, by = "area")

t_slope <- m4 |>
  spread_draws(b_temp_sc, r_area_f[temp_sc,]) |>
  median_qi(slope = b_temp_sc + r_area_f, .width = c(.9)) |>
  mutate(area = temp_sc) |>
  left_join(mean_temps, by = "area")

tidy(lm(intercept ~ mean_temp, data = t_inter))
# A tibble: 2 × 5
  term        estimate std.error statistic  p.value
  <chr>          <dbl>     <dbl>     <dbl>    <dbl>
1 (Intercept)   45.4       1.06      43.0  9.52e-11
2 mean_temp     -0.148     0.115     -1.29 2.35e- 1
tidy(lm(slope ~ mean_temp, data = t_slope))
# A tibble: 2 × 5
  term        estimate std.error statistic p.value
  <chr>          <dbl>     <dbl>     <dbl>   <dbl>
1 (Intercept)    4.31      1.10       3.93 0.00437
2 mean_temp     -0.144     0.120     -1.20 0.265  

Plot conceptual model

get_variables(m4)
 [1] "b_Intercept"                       "b_temp_sc"                        
 [3] "b_temp_sq_sc"                      "sd_area_f__Intercept"             
 [5] "sd_area_f__temp_sc"                "sd_area_f__temp_sq_sc"            
 [7] "cor_area_f__Intercept__temp_sc"    "cor_area_f__Intercept__temp_sq_sc"
 [9] "cor_area_f__temp_sc__temp_sq_sc"   "sigma"                            
[11] "nu"                                "Intercept"                        
[13] "r_area_f[BS,Intercept]"            "r_area_f[BT,Intercept]"           
[15] "r_area_f[FB,Intercept]"            "r_area_f[FM,Intercept]"           
[17] "r_area_f[HO,Intercept]"            "r_area_f[JM,Intercept]"           
[19] "r_area_f[MU,Intercept]"            "r_area_f[RA,Intercept]"           
[21] "r_area_f[SI_EK,Intercept]"         "r_area_f[SI_HA,Intercept]"        
[23] "r_area_f[BS,temp_sc]"              "r_area_f[BT,temp_sc]"             
[25] "r_area_f[FB,temp_sc]"              "r_area_f[FM,temp_sc]"             
[27] "r_area_f[HO,temp_sc]"              "r_area_f[JM,temp_sc]"             
[29] "r_area_f[MU,temp_sc]"              "r_area_f[RA,temp_sc]"             
[31] "r_area_f[SI_EK,temp_sc]"           "r_area_f[SI_HA,temp_sc]"          
[33] "r_area_f[BS,temp_sq_sc]"           "r_area_f[BT,temp_sq_sc]"          
[35] "r_area_f[FB,temp_sq_sc]"           "r_area_f[FM,temp_sq_sc]"          
[37] "r_area_f[HO,temp_sq_sc]"           "r_area_f[JM,temp_sq_sc]"          
[39] "r_area_f[MU,temp_sq_sc]"           "r_area_f[RA,temp_sq_sc]"          
[41] "r_area_f[SI_EK,temp_sq_sc]"        "r_area_f[SI_HA,temp_sq_sc]"       
[43] "lprior"                            "lp__"                             
[45] "z_1[1,1]"                          "z_1[2,1]"                         
[47] "z_1[3,1]"                          "z_1[1,2]"                         
[49] "z_1[2,2]"                          "z_1[3,2]"                         
[51] "z_1[1,3]"                          "z_1[2,3]"                         
[53] "z_1[3,3]"                          "z_1[1,4]"                         
[55] "z_1[2,4]"                          "z_1[3,4]"                         
[57] "z_1[1,5]"                          "z_1[2,5]"                         
[59] "z_1[3,5]"                          "z_1[1,6]"                         
[61] "z_1[2,6]"                          "z_1[3,6]"                         
[63] "z_1[1,7]"                          "z_1[2,7]"                         
[65] "z_1[3,7]"                          "z_1[1,8]"                         
[67] "z_1[2,8]"                          "z_1[3,8]"                         
[69] "z_1[1,9]"                          "z_1[2,9]"                         
[71] "z_1[3,9]"                          "z_1[1,10]"                        
[73] "z_1[2,10]"                         "z_1[3,10]"                        
[75] "L_1[1,1]"                          "L_1[2,1]"                         
[77] "L_1[3,1]"                          "L_1[1,2]"                         
[79] "L_1[2,2]"                          "L_1[3,2]"                         
[81] "L_1[1,3]"                          "L_1[2,3]"                         
[83] "L_1[3,3]"                          "accept_stat__"                    
[85] "stepsize__"                        "treedepth__"                      
[87] "n_leapfrog__"                      "divergent__"                      
[89] "energy__"                         
par <- m4 |>
  spread_draws(
    b_Intercept, b_temp_sc, b_temp_sq_sc,
    `r_area_f[FM,Intercept]`, `r_area_f[FM,temp_sc]`, `r_area_f[FM,temp_sq_sc]`
  ) |>
  ungroup() |>
  mutate(
    intercept = b_Intercept + `r_area_f[FM,Intercept]`,
    b1 = b_temp_sc + `r_area_f[FM,temp_sc]`,
    b2 = b_temp_sq_sc + `r_area_f[FM,temp_sq_sc]`
  ) |>
  summarise(
    intercept = mean(intercept),
    b1 = mean(b1),
    b2 = mean(b2)
  )
ungroup: no grouping variables
mutate: new variable 'intercept' (double) with 7,994 unique values and 0% NA
        new variable 'b1' (double) with 7,994 unique values and 0% NA
        new variable 'b2' (double) with 7,994 unique values and 0% NA
summarise: now one row and 3 columns, ungrouped
# No adaptation
no_adapt_1 <- tibble(temp = seq(5, 15, length.out = 500)) |>
  mutate(
    temp_sc = (temp - mean(VBG$temp) / sd(VBG$temp)),
    temp_sc_sq = temp_sc * temp_sc
  ) |>
  mutate(
    growth_rate = par$intercept + temp_sc * (par$b1 * 3) + temp_sc_sq * (par$b2 * 0.08),
    pop = "Cold"
  )
mutate: new variable 'temp_sc' (double) with 500 unique values and 0% NA
        new variable 'temp_sc_sq' (double) with 500 unique values and 0% NA
mutate: new variable 'growth_rate' (double) with 500 unique values and 0% NA
        new variable 'pop' (character) with one unique value and 0% NA
no_adapt_2 <- tibble(temp = seq(10, 20, length.out = 500)) |>
  mutate(
    temp_sc = (temp - mean(VBG$temp) / sd(VBG$temp)),
    temp_sc_sq = temp_sc * temp_sc
  ) |>
  mutate(
    growth_rate = par$intercept + temp_sc * (par$b1 * 3) + temp_sc_sq * (par$b2 * 0.08),
    pop = "Medium",
    growth_rate = growth_rate + 0.5
  )
mutate: new variable 'temp_sc' (double) with 500 unique values and 0% NA
        new variable 'temp_sc_sq' (double) with 500 unique values and 0% NA
mutate: new variable 'growth_rate' (double) with 500 unique values and 0% NA
        new variable 'pop' (character) with one unique value and 0% NA
no_adapt_3 <- tibble(temp = seq(15, 25, length.out = 500)) |>
  mutate(
    temp_sc = (temp - mean(VBG$temp) / sd(VBG$temp)),
    temp_sc_sq = temp_sc * temp_sc
  ) |>
  mutate(
    growth_rate = par$intercept + temp_sc * (par$b1 * 3) + temp_sc_sq * (par$b2 * 0.08),
    pop = "Warm",
    growth_rate = growth_rate + 1
  )
mutate: new variable 'temp_sc' (double) with 500 unique values and 0% NA
        new variable 'temp_sc_sq' (double) with 500 unique values and 0% NA
mutate: new variable 'growth_rate' (double) with 500 unique values and 0% NA
        new variable 'pop' (character) with one unique value and 0% NA
no_adapt <- bind_rows(no_adapt_1, no_adapt_2, no_adapt_3)

p1 <- ggplot(no_adapt, aes(temp, growth_rate, color = pop)) +
  geom_line(linewidth = 1.2, alpha = 0.9) +
  labs(
    y = "Growth rate →", x = "Temperature →", color = "Site",
    title = "No adaptation"
  ) +
  theme(
    legend.position.inside = c(0.5, 0.12),
    axis.text.y = element_blank(),
    axis.ticks.y = element_blank(),
    axis.title.x = element_blank(),
    axis.text.x = element_blank(),
    axis.ticks.x = element_blank(),
    legend.direction = "horizontal"
  ) +
  guides(color = guide_legend(position = "inside", title.position = "top", title.hjust = 0.5))


# Now do "local adaption and increasing max growth with temperature
adapt_1 <- tibble(temp = seq(5, 15, length.out = 500)) |>
  mutate(
    temp_sc = (temp - mean(VBG$temp) / sd(VBG$temp)),
    temp_sc_sq = temp_sc * temp_sc
  ) |>
  mutate(
    growth_rate = par$intercept + temp_sc * (par$b1 * 4) + temp_sc_sq * (par$b2 * 0.18),
    pop = "Cold"
  )
mutate: new variable 'temp_sc' (double) with 500 unique values and 0% NA
        new variable 'temp_sc_sq' (double) with 500 unique values and 0% NA
mutate: new variable 'growth_rate' (double) with 500 unique values and 0% NA
        new variable 'pop' (character) with one unique value and 0% NA
adapt_2 <- tibble(temp = seq(10, 20, length.out = 500)) |>
  mutate(
    temp_sc = (temp - mean(VBG$temp) / sd(VBG$temp)),
    temp_sc_sq = temp_sc * temp_sc
  ) |>
  # mutate(growth_rate = par$intercept*0.95 + temp_sc*(par$b1 * 3.5) + temp_sc_sq*(par$b2*0.1),
  mutate(
    growth_rate = par$intercept * 0.976 + temp_sc * (par$b1 * 3) + temp_sc_sq * (par$b2 * 0.08),
    pop = "Medium",
    growth_rate = growth_rate + 1
  )
mutate: new variable 'temp_sc' (double) with 500 unique values and 0% NA
        new variable 'temp_sc_sq' (double) with 500 unique values and 0% NA
mutate: new variable 'growth_rate' (double) with 500 unique values and 0% NA
        new variable 'pop' (character) with one unique value and 0% NA
adapt_3 <- tibble(temp = seq(15, 25, length.out = 500)) |>
  mutate(
    temp_sc = (temp - mean(VBG$temp) / sd(VBG$temp)),
    temp_sc_sq = temp_sc * temp_sc
  ) |>
  mutate(
    growth_rate = par$intercept * 0.725 + temp_sc * (par$b1 * 5) + temp_sc_sq * (par$b2 * 0.11),
    pop = "Warm",
    growth_rate = growth_rate + 2
  )
mutate: new variable 'temp_sc' (double) with 500 unique values and 0% NA
        new variable 'temp_sc_sq' (double) with 500 unique values and 0% NA
mutate: new variable 'growth_rate' (double) with 500 unique values and 0% NA
        new variable 'pop' (character) with one unique value and 0% NA
# ggplot(adapt_3, aes(temp, growth_rate, color = pop)) +
#   geom_line(linewidth = 1.2, alpha = 0.8)

adapt <- bind_rows(adapt_1, adapt_2, adapt_3)

p2 <-
  ggplot(adapt, aes(temp, growth_rate, color = pop)) +
  geom_line(linewidth = 1.2, alpha = 0.8) +
  labs(
    y = "Growth rate →", x = "Temperature →", title = "Local adaptation",
    subtitle = "Temperature-dependent maximum growth"
  ) +
  theme(
    axis.text.y = element_blank(),
    axis.ticks.y = element_blank(),
    axis.title.y = element_blank(),
    axis.text.x = element_blank(),
    axis.ticks.x = element_blank()
  ) +
  guides(color = "none")


# Now do "local adaption and constant max growth with temperature
adaptb_1 <- tibble(temp = seq(5, 13, length.out = 500)) |>
  mutate(
    temp_sc = (temp - mean(VBG$temp) / sd(VBG$temp)),
    temp_sc_sq = temp_sc * temp_sc
  ) |>
  mutate(
    growth_rate = par$intercept + temp_sc * (par$b1 * 4) + temp_sc_sq * (par$b2 * 0.18),
    pop = "Cold"
  )
mutate: new variable 'temp_sc' (double) with 500 unique values and 0% NA
        new variable 'temp_sc_sq' (double) with 500 unique values and 0% NA
mutate: new variable 'growth_rate' (double) with 500 unique values and 0% NA
        new variable 'pop' (character) with one unique value and 0% NA
adaptb_2 <- tibble(temp = seq(5, 13, length.out = 500)) |>
  mutate(
    temp_sc = (temp - mean(VBG$temp) / sd(VBG$temp)),
    temp_sc_sq = temp_sc * temp_sc
  ) |>
  mutate(
    growth_rate = par$intercept + temp_sc * (par$b1 * 4) + temp_sc_sq * (par$b2 * 0.18),
    pop = "Medium",
    growth_rate = growth_rate
  ) |>
  mutate(temp = temp + 5)
mutate: new variable 'temp_sc' (double) with 500 unique values and 0% NA
        new variable 'temp_sc_sq' (double) with 500 unique values and 0% NA
mutate: new variable 'growth_rate' (double) with 500 unique values and 0% NA
        new variable 'pop' (character) with one unique value and 0% NA
mutate: changed 500 values (100%) of 'temp' (0 new NA)
adaptb_3 <- tibble(temp = seq(5, 13, length.out = 500)) |>
  mutate(
    temp_sc = (temp - mean(VBG$temp) / sd(VBG$temp)),
    temp_sc_sq = temp_sc * temp_sc
  ) |>
  mutate(
    growth_rate = par$intercept + temp_sc * (par$b1 * 4) + temp_sc_sq * (par$b2 * 0.18),
    pop = "Warm",
    growth_rate = growth_rate
  ) |>
  mutate(temp = temp + 10)
mutate: new variable 'temp_sc' (double) with 500 unique values and 0% NA
        new variable 'temp_sc_sq' (double) with 500 unique values and 0% NA
mutate: new variable 'growth_rate' (double) with 500 unique values and 0% NA
        new variable 'pop' (character) with one unique value and 0% NA
mutate: changed 500 values (100%) of 'temp' (0 new NA)
adaptb <- bind_rows(adaptb_1, adaptb_2, adaptb_3)

p3 <-
  ggplot(adaptb, aes(temp, growth_rate, color = pop)) +
  geom_line(linewidth = 1.2, alpha = 0.8) +
  labs(
    y = "Growth rate →", x = "Temperature →", title = "Local adaptation",
    subtitle = "Constant maximum growth"
  ) +
  theme(
    axis.text.y = element_blank(),
    axis.ticks.y = element_blank(),
    axis.text.x = element_blank(),
    axis.title.x = element_blank(),
    axis.title.y = element_blank(),
    axis.ticks.x = element_blank()
  ) +
  guides(color = "none")

(p1 + p2 + p3) &
  # plot_layout(axes = "collect") &
  theme(
    axis.title = element_text(size = 10),
    plot.title = element_text(size = 10),
    plot.subtitle = element_text(size = 8.3)
  ) &
  scale_color_manual(values = colors[c(10, 5, 1)])

ggsave(paste0(home, "/figures/concept.pdf"), width = 19, height = 7.5, units = "cm", device = cairo_pdf)

Supplementary plot

# Plot prior vs posterior
# Refit with sampling on prior

m4p <- brm(A ~ temp_sc + temp_sq_sc + (1 + temp_sc + temp_sq_sc | area_f),
  data = dat,
  cores = 4,
  chains = 4,
  iter = 4000,
  family = student,
  prior = set_prior("normal(0,5)", class = "b"),
  sample_prior = TRUE,
  save_pars = save_pars(all = TRUE),
  control = list(adapt_delta = 0.95),
)
Compiling Stan program...
Trying to compile a simple C file
Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
using C compiler: ‘Apple clang version 16.0.0 (clang-1600.0.26.6)’
using SDK: ‘MacOSX15.2.sdk’
clang -arch arm64 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG   -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/Rcpp/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/unsupported"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/src/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppParallel/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/rstan/include" -DEIGEN_NO_DEBUG  -DBOOST_DISABLE_ASSERTS  -DBOOST_PENDING_INTEGER_LOG2_HPP  -DSTAN_THREADS  -DUSE_STANC3 -DSTRICT_R_HEADERS  -DBOOST_PHOENIX_NO_VARIADIC_EXPRESSION  -D_HAS_AUTO_PTR_ETC=0  -include '/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp'  -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1   -I/opt/R/arm64/include    -fPIC  -falign-functions=64 -Wall -g -O2  -c foo.c -o foo.o
In file included from <built-in>:1:
In file included from /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp:22:
In file included from /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
In file included from /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/Eigen/Core:19:
/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:679:10: fatal error: 'cmath' file not found
  679 | #include <cmath>
      |          ^~~~~~~
1 error generated.
make: *** [foo.o] Error 1
Start sampling
prior_summary(m4p)
                   prior     class       coef  group resp dpar nlpar lb ub
             normal(0,5)         b                                        
             normal(0,5)         b    temp_sc                             
             normal(0,5)         b temp_sq_sc                             
 student_t(3, 43.4, 4.4) Intercept                                        
    lkj_corr_cholesky(1)         L                                        
    lkj_corr_cholesky(1)         L            area_f                      
           gamma(2, 0.1)        nu                                    1   
    student_t(3, 0, 4.4)        sd                                    0   
    student_t(3, 0, 4.4)        sd            area_f                  0   
    student_t(3, 0, 4.4)        sd  Intercept area_f                  0   
    student_t(3, 0, 4.4)        sd    temp_sc area_f                  0   
    student_t(3, 0, 4.4)        sd temp_sq_sc area_f                  0   
    student_t(3, 0, 4.4)     sigma                                    0   
       source
         user
 (vectorized)
 (vectorized)
      default
      default
 (vectorized)
      default
      default
 (vectorized)
 (vectorized)
 (vectorized)
 (vectorized)
      default
get_variables(m4p)
 [1] "b_Intercept"                       "b_temp_sc"                        
 [3] "b_temp_sq_sc"                      "sd_area_f__Intercept"             
 [5] "sd_area_f__temp_sc"                "sd_area_f__temp_sq_sc"            
 [7] "cor_area_f__Intercept__temp_sc"    "cor_area_f__Intercept__temp_sq_sc"
 [9] "cor_area_f__temp_sc__temp_sq_sc"   "sigma"                            
[11] "nu"                                "Intercept"                        
[13] "r_area_f[BS,Intercept]"            "r_area_f[BT,Intercept]"           
[15] "r_area_f[FB,Intercept]"            "r_area_f[FM,Intercept]"           
[17] "r_area_f[HO,Intercept]"            "r_area_f[JM,Intercept]"           
[19] "r_area_f[MU,Intercept]"            "r_area_f[RA,Intercept]"           
[21] "r_area_f[SI_EK,Intercept]"         "r_area_f[SI_HA,Intercept]"        
[23] "r_area_f[BS,temp_sc]"              "r_area_f[BT,temp_sc]"             
[25] "r_area_f[FB,temp_sc]"              "r_area_f[FM,temp_sc]"             
[27] "r_area_f[HO,temp_sc]"              "r_area_f[JM,temp_sc]"             
[29] "r_area_f[MU,temp_sc]"              "r_area_f[RA,temp_sc]"             
[31] "r_area_f[SI_EK,temp_sc]"           "r_area_f[SI_HA,temp_sc]"          
[33] "r_area_f[BS,temp_sq_sc]"           "r_area_f[BT,temp_sq_sc]"          
[35] "r_area_f[FB,temp_sq_sc]"           "r_area_f[FM,temp_sq_sc]"          
[37] "r_area_f[HO,temp_sq_sc]"           "r_area_f[JM,temp_sq_sc]"          
[39] "r_area_f[MU,temp_sq_sc]"           "r_area_f[RA,temp_sq_sc]"          
[41] "r_area_f[SI_EK,temp_sq_sc]"        "r_area_f[SI_HA,temp_sq_sc]"       
[43] "prior_Intercept"                   "prior_b"                          
[45] "prior_sigma"                       "prior_nu"                         
[47] "prior_sd_area_f"                   "prior_cor_area_f"                 
[49] "lprior"                            "lp__"                             
[51] "z_1[1,1]"                          "z_1[2,1]"                         
[53] "z_1[3,1]"                          "z_1[1,2]"                         
[55] "z_1[2,2]"                          "z_1[3,2]"                         
[57] "z_1[1,3]"                          "z_1[2,3]"                         
[59] "z_1[3,3]"                          "z_1[1,4]"                         
[61] "z_1[2,4]"                          "z_1[3,4]"                         
[63] "z_1[1,5]"                          "z_1[2,5]"                         
[65] "z_1[3,5]"                          "z_1[1,6]"                         
[67] "z_1[2,6]"                          "z_1[3,6]"                         
[69] "z_1[1,7]"                          "z_1[2,7]"                         
[71] "z_1[3,7]"                          "z_1[1,8]"                         
[73] "z_1[2,8]"                          "z_1[3,8]"                         
[75] "z_1[1,9]"                          "z_1[2,9]"                         
[77] "z_1[3,9]"                          "z_1[1,10]"                        
[79] "z_1[2,10]"                         "z_1[3,10]"                        
[81] "L_1[1,1]"                          "L_1[2,1]"                         
[83] "L_1[3,1]"                          "L_1[1,2]"                         
[85] "L_1[2,2]"                          "L_1[3,2]"                         
[87] "L_1[1,3]"                          "L_1[2,3]"                         
[89] "L_1[3,3]"                          "accept_stat__"                    
[91] "stepsize__"                        "treedepth__"                      
[93] "n_leapfrog__"                      "divergent__"                      
[95] "energy__"                         
plot(m4)

post_draws <- get_post_draws(
  model = m4p,
  params = c(
    "b_Intercept", "b_temp_sc", "sigma",
    "nu"
  )
)
Warning: Dropping 'draws_df' class as required metadata was removed.
pivot_longer: reorganized (b_Intercept, b_temp_sc, sigma, nu) into (parameter, value) [was 8000x4, now 32000x2]
mutate: new variable 'type' (character) with one unique value and 0% NA
prior_draws <- get_prior_draws(
  model = m4p,
  params = c(
    "prior_Intercept", "prior_b", "prior_sigma",
    "prior_nu"
  )
)
Warning: Dropping 'draws_df' class as required metadata was removed.
pivot_longer: reorganized (prior_Intercept, prior_b, prior_sigma, prior_nu) into (parameter, value) [was 8000x4, now 32000x2]
mutate: changed 32,000 values (100%) of 'parameter' (0 new NA)
mutate: new variable 'type' (character) with one unique value and 0% NA
dist <- bind_rows(
  post_draws |>
    mutate(
      parameter = ifelse(parameter == "b_Intercept",
        "Intercept",
        parameter
      ),
      parameter = ifelse(parameter == "b_temp_sc",
        "temp_sc",
        parameter
      )
    ),
  prior_draws |>
    mutate(parameter = ifelse(parameter == "b", "temp_sc",
      parameter
    ))
)
mutate: changed 16,000 values (50%) of 'parameter' (0 new NA)
mutate: changed 8,000 values (25%) of 'parameter' (0 new NA)
plot_prior_post(dist, type) +
  theme(legend.position.inside = c(0.93, 0.97)) +
  guides(fill = guide_legend(position = "inside")) +
  labs(x = "Value", y = "Density") +
  facet_wrap(~parameter, scales = "free")
Warning: A numeric `legend.position` argument in `theme()` was deprecated in ggplot2
3.5.0.
ℹ Please use the `legend.position.inside` argument of `theme()` instead.

ggsave(paste0(home, "/figures/supp/brms_prior.pdf"), width = 17, height = 17, units = "cm")

# Posterior predictive
pp_check(m4p) +
  theme(legend.position.inside = c(0.8, 0.8),
        axis.text.x = element_markdown()) +
  guides(color = guide_legend(position = "inside")) +
  labs(x = "Growth coefficient (*A*)")
Using 10 posterior draws for ppc type 'dens_overlay' by default.

# Posterior predictive
ggsave(paste0(home, "/figures/supp/pp_check.pdf"), width = 11, height = 11, units = "cm")

Fit models of K and Linf to temperature

# Quadratic effect of temperature
m4k <- brm(k ~ temp_sc + temp_sq_sc + (1 + temp_sc + temp_sq_sc | area_f),
  data = dat,
  cores = 4,
  chains = 4,
  iter = 4000,
  seed = 99,
  family = student,
  prior = set_prior("normal(0,5)", class = "b"),
  save_pars = save_pars(all = TRUE),
  control = list(adapt_delta = 0.999),
)
Compiling Stan program...
Trying to compile a simple C file
Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
using C compiler: ‘Apple clang version 16.0.0 (clang-1600.0.26.6)’
using SDK: ‘MacOSX15.2.sdk’
clang -arch arm64 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG   -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/Rcpp/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/unsupported"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/src/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppParallel/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/rstan/include" -DEIGEN_NO_DEBUG  -DBOOST_DISABLE_ASSERTS  -DBOOST_PENDING_INTEGER_LOG2_HPP  -DSTAN_THREADS  -DUSE_STANC3 -DSTRICT_R_HEADERS  -DBOOST_PHOENIX_NO_VARIADIC_EXPRESSION  -D_HAS_AUTO_PTR_ETC=0  -include '/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp'  -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1   -I/opt/R/arm64/include    -fPIC  -falign-functions=64 -Wall -g -O2  -c foo.c -o foo.o
In file included from <built-in>:1:
In file included from /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp:22:
In file included from /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
In file included from /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/Eigen/Core:19:
/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:679:10: fatal error: 'cmath' file not found
  679 | #include <cmath>
      |          ^~~~~~~
1 error generated.
make: *** [foo.o] Error 1
Start sampling
prior_summary(m4k)
                  prior     class       coef  group resp dpar nlpar lb ub
            normal(0,5)         b                                        
            normal(0,5)         b    temp_sc                             
            normal(0,5)         b temp_sq_sc                             
 student_t(3, 0.2, 2.5) Intercept                                        
   lkj_corr_cholesky(1)         L                                        
   lkj_corr_cholesky(1)         L            area_f                      
          gamma(2, 0.1)        nu                                    1   
   student_t(3, 0, 2.5)        sd                                    0   
   student_t(3, 0, 2.5)        sd            area_f                  0   
   student_t(3, 0, 2.5)        sd  Intercept area_f                  0   
   student_t(3, 0, 2.5)        sd    temp_sc area_f                  0   
   student_t(3, 0, 2.5)        sd temp_sq_sc area_f                  0   
   student_t(3, 0, 2.5)     sigma                                    0   
       source
         user
 (vectorized)
 (vectorized)
      default
      default
 (vectorized)
      default
      default
 (vectorized)
 (vectorized)
 (vectorized)
 (vectorized)
      default
m4l <- brm(linf ~ temp_sc + temp_sq_sc + (1 + temp_sc + temp_sq_sc | area_f),
  data = dat,
  cores = 4,
  chains = 4,
  iter = 4000,
  seed = 99,
  family = student,
  prior = set_prior("normal(0,5)", class = "b"),
  save_pars = save_pars(all = TRUE)
)
Compiling Stan program...
Trying to compile a simple C file
Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
using C compiler: ‘Apple clang version 16.0.0 (clang-1600.0.26.6)’
using SDK: ‘MacOSX15.2.sdk’
clang -arch arm64 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG   -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/Rcpp/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/unsupported"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/src/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppParallel/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/rstan/include" -DEIGEN_NO_DEBUG  -DBOOST_DISABLE_ASSERTS  -DBOOST_PENDING_INTEGER_LOG2_HPP  -DSTAN_THREADS  -DUSE_STANC3 -DSTRICT_R_HEADERS  -DBOOST_PHOENIX_NO_VARIADIC_EXPRESSION  -D_HAS_AUTO_PTR_ETC=0  -include '/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp'  -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1   -I/opt/R/arm64/include    -fPIC  -falign-functions=64 -Wall -g -O2  -c foo.c -o foo.o
In file included from <built-in>:1:
In file included from /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp:22:
In file included from /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
In file included from /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/Eigen/Core:19:
/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:679:10: fatal error: 'cmath' file not found
  679 | #include <cmath>
      |          ^~~~~~~
1 error generated.
make: *** [foo.o] Error 1
Start sampling
prior_summary(m4l)
                   prior     class       coef  group resp dpar nlpar lb ub
             normal(0,5)         b                                        
             normal(0,5)         b    temp_sc                             
             normal(0,5)         b temp_sq_sc                             
 student_t(3, 340.9, 80) Intercept                                        
    lkj_corr_cholesky(1)         L                                        
    lkj_corr_cholesky(1)         L            area_f                      
           gamma(2, 0.1)        nu                                    1   
     student_t(3, 0, 80)        sd                                    0   
     student_t(3, 0, 80)        sd            area_f                  0   
     student_t(3, 0, 80)        sd  Intercept area_f                  0   
     student_t(3, 0, 80)        sd    temp_sc area_f                  0   
     student_t(3, 0, 80)        sd temp_sq_sc area_f                  0   
     student_t(3, 0, 80)     sigma                                    0   
       source
         user
 (vectorized)
 (vectorized)
      default
      default
 (vectorized)
      default
      default
 (vectorized)
 (vectorized)
 (vectorized)
 (vectorized)
      default

Plot these fits

# PP_check
p1 <- pp_check(m4k) +
  theme(
    legend.position.inside = c(0.8, 0.8),
    axis.title.x = element_markdown()
  ) +
  guides(color = guide_legend(position = "inside")) +
  labs(x = "*k*")
Using 10 posterior draws for ppc type 'dens_overlay' by default.
p2 <- pp_check(m4l) +
  coord_cartesian(xlim = c(0, 1000)) +
  theme(legend.position.inside = c(0.8, 0.8)) +
  guides(color = guide_legend(position = "inside")) +
  labs(x = expression(paste(italic(L[infinity]), " [mm]")))
Using 10 posterior draws for ppc type 'dens_overlay' by default.
Coordinate system already present. Adding new coordinate system, which will
replace the existing one.
p1 + p2

ggsave(paste0(home, "/figures/supp/k_linf_pp_check.pdf"), width = 17, height = 11, units = "cm")


# Conditional predictions
m4k_pred <- dat |>
  group_by(area) |>
  data_grid(temp_sc = seq_range(temp_sc, n = 100)) |>
  ungroup() |>
  mutate(
    area_f = as.factor(area),
    temp_sq_sc = temp_sc * temp_sc,
    temp = (temp_sc * sd(VBG$temp)) + mean(VBG$temp)
  ) |>
  add_epred_draws(m4k)
group_by: one grouping variable (area)
ungroup: no grouping variables
mutate: new variable 'area_f' (factor) with 10 unique values and 0% NA
        new variable 'temp_sq_sc' (double) with 999 unique values and 0% NA
        new variable 'temp' (double) with 999 unique values and 0% NA
m4l_pred <- dat |>
  group_by(area) |>
  data_grid(temp_sc = seq_range(temp_sc, n = 100)) |>
  ungroup() |>
  mutate(
    area_f = as.factor(area),
    temp_sq_sc = temp_sc * temp_sc,
    temp = (temp_sc * sd(VBG$temp)) + mean(VBG$temp)
  ) |>
  add_epred_draws(m4l)
group_by: one grouping variable (area)
ungroup: no grouping variables
mutate: new variable 'area_f' (factor) with 10 unique values and 0% NA
        new variable 'temp_sq_sc' (double) with 999 unique values and 0% NA
        new variable 'temp' (double) with 999 unique values and 0% NA
# m4_kl <- bind_rows(m4k_pred, m4l_pred)

labels <- ifelse(order$area %in% c("SI_HA", "BT"), paste0(order$area, "*"), order$area)

# K
pk <- ggplot(dat, aes(temp, k, color = factor(area, order$area)), size = 0.6) +
  geom_point() +
  scale_color_manual(values = colors, name = "Site", labels = labels) +
  scale_fill_manual(values = colors, name = "Site", labels = labels) +
  guides(
    fill = "none",
    color = guide_legend(
      nrow = 1, reverse = TRUE, title.position = "top", title.hjust = 0.5,
      override.aes = list(size = 1)
    )
  ) +
  theme(
    axis.title.y = ggtext::element_markdown(),
    legend.position = "bottom",
    legend.direction = "horizontal"
  ) +
  stat_lineribbon(
    data = m4k_pred,
    aes(temp,
      y = .epred,
      fill = factor(area, order$area)
    ),
    alpha = 0.1, .width = 0.95
  ) +
  stat_lineribbon(
    data = m4k_pred,
    aes(temp,
      y = .epred,
      fill = factor(area, order$area)
    ),
    alpha = 1, .width = 0
  ) +
  guides(fill = "none", color = guide_legend(
    nrow = 1, reverse = TRUE,
    title.position = "top", title.hjust = 0.5,
    override.aes = list(fill = NA)
  )) +
  labs(x = "Mean site temperature [°C]", y = "*k*", color = "")

# Linf
pl <- ggplot(dat, aes(temp, linf, color = factor(area, order$area)), size = 0.6) +
  geom_point() +
  scale_color_manual(values = colors, name = "Site", labels = labels) +
  scale_fill_manual(values = colors, name = "Site", labels = labels) +
  theme(
    legend.position = "bottom",
    legend.direction = "horizontal"
  ) +
  stat_lineribbon(
    data = m4l_pred,
    aes(temp,
      y = .epred,
      fill = factor(area, order$area)
    ),
    alpha = 0.1, .width = 0.95
  ) +
  stat_lineribbon(
    data = m4l_pred,
    aes(temp,
      y = .epred,
      fill = factor(area, order$area)
    ),
    alpha = 1, .width = 0
  ) +
  guides(fill = "none", color = guide_legend(
    nrow = 1, reverse = TRUE,
    title.position = "top", title.hjust = 0.5,
    override.aes = list(fill = NA)
  )) +
  labs(
    x = "Mean site temperature [°C]", y = expression(paste(italic(L[infinity]), " [mm]")),
    color = ""
  )

pk / pl +
  plot_layout(axis = "collect", guides = "collect") &
  theme(legend.position = "bottom")

ggsave(paste0(home, "/figures/supp/k_linf_conditional.pdf"), width = 17, height = 25, units = "cm")

Print global temperature slopes

summary(m4k)
 Family: student 
  Links: mu = identity; sigma = identity; nu = identity 
Formula: k ~ temp_sc + temp_sq_sc + (1 + temp_sc + temp_sq_sc | area_f) 
   Data: dat (Number of observations: 306) 
  Draws: 4 chains, each with iter = 4000; warmup = 2000; thin = 1;
         total post-warmup draws = 8000

Group-Level Effects: 
~area_f (Number of levels: 10) 
                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(Intercept)                 0.03      0.01     0.01     0.05 1.00     2720
sd(temp_sc)                   0.02      0.01     0.00     0.04 1.00     1642
sd(temp_sq_sc)                0.01      0.01     0.00     0.02 1.00     2501
cor(Intercept,temp_sc)        0.13      0.41    -0.68     0.85 1.00     4903
cor(Intercept,temp_sq_sc)     0.02      0.48    -0.87     0.86 1.00     6317
cor(temp_sc,temp_sq_sc)      -0.06      0.50    -0.90     0.85 1.00     5084
                          Tail_ESS
sd(Intercept)                 3936
sd(temp_sc)                   2010
sd(temp_sq_sc)                3494
cor(Intercept,temp_sc)        4846
cor(Intercept,temp_sq_sc)     5487
cor(temp_sc,temp_sq_sc)       5584

Population-Level Effects: 
           Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept      0.20      0.01     0.18     0.22 1.00     2502     3903
temp_sc        0.00      0.01    -0.02     0.02 1.00     3808     3944
temp_sq_sc    -0.01      0.01    -0.02     0.00 1.00     3846     3172

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     0.04      0.00     0.04     0.04 1.00     7474     5306
nu       24.49     13.08     8.01    58.22 1.00     7756     6374

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
summary(m4l)
 Family: student 
  Links: mu = identity; sigma = identity; nu = identity 
Formula: linf ~ temp_sc + temp_sq_sc + (1 + temp_sc + temp_sq_sc | area_f) 
   Data: dat (Number of observations: 306) 
  Draws: 4 chains, each with iter = 4000; warmup = 2000; thin = 1;
         total post-warmup draws = 8000

Group-Level Effects: 
~area_f (Number of levels: 10) 
                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(Intercept)                29.10     12.87     8.90    59.55 1.00     2986
sd(temp_sc)                  52.76     18.30    24.18    95.99 1.00     3511
sd(temp_sq_sc)               14.99     10.99     0.76    41.72 1.00     2886
cor(Intercept,temp_sc)       -0.40      0.34    -0.92     0.37 1.00     3076
cor(Intercept,temp_sq_sc)    -0.09      0.48    -0.88     0.80 1.00     6450
cor(temp_sc,temp_sq_sc)       0.28      0.44    -0.68     0.93 1.00     6826
                          Tail_ESS
sd(Intercept)                 3021
sd(temp_sc)                   4616
sd(temp_sq_sc)                3475
cor(Intercept,temp_sc)        3753
cor(Intercept,temp_sq_sc)     5915
cor(temp_sc,temp_sq_sc)       5676

Population-Level Effects: 
           Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    334.99     12.07   313.17   361.25 1.00     3510     3540
temp_sc        0.20      4.83    -9.35     9.77 1.00    10796     6087
temp_sq_sc     5.01      4.54    -4.31    13.66 1.00     6920     4724

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma    48.59      4.52    40.19    57.93 1.00     6726     5563
nu        2.56      0.53     1.75     3.80 1.00     7141     5586

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

Calculate VBGE fits for specific temperatures curves. Since the global effects of temperature are flat, we have to do it with area-specific predictions. But which ones? Here I predict k and Linf for 2 temperatures: mean in area and +3 °C. I then use those to calculate reference and warm VBGE curves. There’s a tendency for an increase in size-at-age in warmer areas. This we sort of see already in the plots of Linf against temperature by area, where cool populations decline and warm increase their Linf.

# Area specific parameters, then calculate growth curves from those

# For the plots below I trim age to 12, which is approximate maximum... with 40, we are essentially just plotting Linf, which we already have better plots for
age_bc <- 0:40

# Predict k across areas
m4k_pars <- dat |>
  group_by(area_f) |>
  data_grid(temp = mean(temp)) |>
  ungroup() |>
  mutate(temp2 = temp + 3) |>
  pivot_longer(c("temp2", "temp"), values_to = "temp") |>
  dplyr::select(-name) |>
  mutate(
    temp_sc = (temp - mean(dat$temp)) / sd(dat$temp),
    temp_sq_sc = temp_sc * temp_sc
  ) |>
  add_epred_draws(m4k) |>
  rename(k = .epred) |>
  group_by(temp, area_f) |>
  summarise(
    k_median = quantile(k, prob = 0.5),
    k_lwr = quantile(k, prob = 0.05),
    k_upr = quantile(k, prob = 0.95)
  )
group_by: one grouping variable (area_f)
ungroup: no grouping variables
mutate: new variable 'temp2' (double) with 10 unique values and 0% NA
pivot_longer: reorganized (temp2) into (name) [was 10x3, now 20x3]
mutate: new variable 'temp_sc' (double) with 20 unique values and 0% NA
        new variable 'temp_sq_sc' (double) with 20 unique values and 0% NA
rename: renamed one variable (k)
group_by: 2 grouping variables (temp, area_f)
summarise: now 20 rows and 5 columns, one group variable remaining (temp)
m4l_pars <- dat |>
  group_by(area_f) |>
  data_grid(temp = mean(temp)) |>
  ungroup() |>
  mutate(temp2 = temp + 3) |>
  pivot_longer(c("temp2", "temp"), values_to = "temp") |>
  dplyr::select(-name) |>
  mutate(
    temp_sc = (temp - mean(dat$temp)) / sd(dat$temp),
    temp_sq_sc = temp_sc * temp_sc
  ) |>
  add_epred_draws(m4l) |>
  rename(linf = .epred) |>
  group_by(temp, area_f) |>
  summarise(
    linf_median = quantile(linf, prob = 0.5),
    linf_lwr = quantile(linf, prob = 0.05),
    linf_upr = quantile(linf, prob = 0.95)
  )
group_by: one grouping variable (area_f)
ungroup: no grouping variables
mutate: new variable 'temp2' (double) with 10 unique values and 0% NA
pivot_longer: reorganized (temp2) into (name) [was 10x3, now 20x3]
mutate: new variable 'temp_sc' (double) with 20 unique values and 0% NA
        new variable 'temp_sq_sc' (double) with 20 unique values and 0% NA
rename: renamed one variable (linf)
group_by: 2 grouping variables (temp, area_f)
summarise: now 20 rows and 5 columns, one group variable remaining (temp)
est <- left_join(m4k_pars,
  m4l_pars,
  by = c("area_f", "temp")
)
left_join: added 3 columns (linf_median, linf_lwr, linf_upr)
           > rows only in x    0
           > rows only in y  ( 0)
           > matched rows     20
           >                 ====
           > rows total       20
# Expand by age and calculate length-at-age
est_sum <- est |>
  crossing(age_bc) |>
  mutate(
    length_mm = linf_median * (1 - exp(-k_median * age_bc)),
    length_mm_lwr = linf_lwr * (1 - exp(-k_lwr * age_bc)),
    length_mm_upr = linf_upr * (1 - exp(-k_upr * age_bc))
  ) |>
  group_by(area_f) |>
  mutate(temp_cat = ifelse(temp == min(temp), "Mean °C", "Mean + 2°C"))
mutate: new variable 'length_mm' (double) with 801 unique values and 0% NA
        new variable 'length_mm_lwr' (double) with 801 unique values and 0% NA
        new variable 'length_mm_upr' (double) with 801 unique values and 0% NA
group_by: one grouping variable (area_f)
mutate (grouped): new variable 'temp_cat' (character) with 2 unique values and 0% NA
pal <- rev(brewer.pal(n = 6, name = "Paired")[c(2, 6)])

est_sum |>
  ggplot(aes(age_bc, length_mm, color = temp_cat)) +
  geom_ribbon(aes(age_bc, ymin = length_mm_lwr, ymax = length_mm_upr, fill = temp_cat),
    alpha = 0.3, color = NA
  ) +
  geom_line() +
  coord_cartesian(xlim = c(0, 12)) +
  scale_color_manual(values = pal, name = "Temperature") +
  scale_fill_manual(values = pal, name = "Temperature") +
  facet_wrap(~ factor(area_f, rev(order$area)), ncol = 4) +
  theme(legend.position = "bottom") +
  labs(x = "Age (years)", y = "Predicted length (mm)")

# Difference in size-at-age?
warm <- est_sum |>
  filter(temp_cat == "Mean + 2°C") |>
  dplyr::select(area_f, length_mm, age_bc) |>
  rename(length_mm_warm = length_mm)
filter (grouped): removed 410 rows (50%), 410 rows remaining
rename: renamed one variable (length_mm_warm)
ref <- est_sum |>
  filter(temp_cat == "Mean °C") |>
  dplyr::select(area_f, length_mm, age_bc) |>
  rename(length_mm_ref = length_mm)
filter (grouped): removed 410 rows (50%), 410 rows remaining
rename: renamed one variable (length_mm_ref)
delta <- left_join(warm, ref, by = c("area_f", "age_bc")) |>
  mutate(diff = length_mm_warm - length_mm_ref)
left_join: added one column (length_mm_ref)
           > rows only in x     0
           > rows only in y  (  0)
           > matched rows     410
           >                 =====
           > rows total       410
mutate (grouped): new variable 'diff' (double) with 401 unique values and 0% NA
ggplot(delta, aes(age_bc, diff, color = factor(area_f, order$area))) +
  geom_line() +
  coord_cartesian(xlim = c(0, 12)) +
  labs(
    x = "Age", color = "Area",
    y = "Difference in size-at-age with 3 st.dev. increase in temperature (mm)"
  ) +
  scale_color_manual(values = colors)